Linear Algebra


In [1]:
import mxnet as mx

Scalars


In [2]:
# Scalars are initialized as NDArrays with a single value
x = mx.nd.array([3.0])
y = mx.nd.array([2.0])

In [3]:
# Addition
x + y


Out[3]:
[5.]
<NDArray 1 @cpu(0)>

In [4]:
# Multiplication
x * y


Out[4]:
[6.]
<NDArray 1 @cpu(0)>

In [5]:
# Division
x / y


Out[5]:
[1.5]
<NDArray 1 @cpu(0)>

In [6]:
# Power
mx.nd.power(x, y)


Out[6]:
[9.]
<NDArray 1 @cpu(0)>

In [7]:
# MXNet scalar as Numpy floating point value
x.asscalar()


Out[7]:
3.0

In [8]:
type(x.asscalar())


Out[8]:
numpy.float32

Vectors


In [9]:
u = mx.nd.arange(4)
u


Out[9]:
[0. 1. 2. 3.]
<NDArray 4 @cpu(0)>

In [10]:
u[3]


Out[10]:
[3.]
<NDArray 1 @cpu(0)>

Length, dimensionality and shape


In [11]:
len(u)


Out[11]:
4

In [12]:
u.shape


Out[12]:
(4,)

Matrices


In [13]:
A = mx.nd.zeros(shape = [5, 4])
A


Out[13]:
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]
 [0. 0. 0. 0.]
 [0. 0. 0. 0.]
 [0. 0. 0. 0.]]
<NDArray 5x4 @cpu(0)>

In [14]:
x = mx.nd.arange(start=0, stop=20)
x


Out[14]:
[ 0.  1.  2.  3.  4.  5.  6.  7.  8.  9. 10. 11. 12. 13. 14. 15. 16. 17.
 18. 19.]
<NDArray 20 @cpu(0)>

In [15]:
A = x.reshape(shape=[5, 4])
A


Out[15]:
[[ 0.  1.  2.  3.]
 [ 4.  5.  6.  7.]
 [ 8.  9. 10. 11.]
 [12. 13. 14. 15.]
 [16. 17. 18. 19.]]
<NDArray 5x4 @cpu(0)>

In [16]:
# Specific value
A[2, 3]


Out[16]:
[11.]
<NDArray 1 @cpu(0)>

In [17]:
# Specific row
A[0, :]


Out[17]:
[0. 1. 2. 3.]
<NDArray 4 @cpu(0)>

In [18]:
# Specific column
A[:, 1]


Out[18]:
[ 1.  5.  9. 13. 17.]
<NDArray 5 @cpu(0)>

In [19]:
# Transport
A.T


Out[19]:
[[ 0.  4.  8. 12. 16.]
 [ 1.  5.  9. 13. 17.]
 [ 2.  6. 10. 14. 18.]
 [ 3.  7. 11. 15. 19.]]
<NDArray 4x5 @cpu(0)>

Tensors


In [20]:
X = mx.nd.arange(start=0, stop=24).reshape(shape=[2, 3, 4])
X


Out[20]:
[[[ 0.  1.  2.  3.]
  [ 4.  5.  6.  7.]
  [ 8.  9. 10. 11.]]

 [[12. 13. 14. 15.]
  [16. 17. 18. 19.]
  [20. 21. 22. 23.]]]
<NDArray 2x3x4 @cpu(0)>

Element-wise operations


In [21]:
u = mx.nd.array([1, 2, 3, 7])

In [22]:
v = mx.nd.ones_like(u) * 2
v


Out[22]:
[2. 2. 2. 2.]
<NDArray 4 @cpu(0)>

In [23]:
u + v


Out[23]:
[3. 4. 5. 9.]
<NDArray 4 @cpu(0)>

In [24]:
u - v


Out[24]:
[-1.  0.  1.  5.]
<NDArray 4 @cpu(0)>

In [25]:
u * v


Out[25]:
[ 2.  4.  6. 14.]
<NDArray 4 @cpu(0)>

In [26]:
u / v


Out[26]:
[0.5 1.  1.5 3.5]
<NDArray 4 @cpu(0)>

In [27]:
A


Out[27]:
[[ 0.  1.  2.  3.]
 [ 4.  5.  6.  7.]
 [ 8.  9. 10. 11.]
 [12. 13. 14. 15.]
 [16. 17. 18. 19.]]
<NDArray 5x4 @cpu(0)>

In [28]:
B = mx.nd.ones_like(A) * 3
B


Out[28]:
[[3. 3. 3. 3.]
 [3. 3. 3. 3.]
 [3. 3. 3. 3.]
 [3. 3. 3. 3.]
 [3. 3. 3. 3.]]
<NDArray 5x4 @cpu(0)>

In [29]:
A * B


Out[29]:
[[ 0.  3.  6.  9.]
 [12. 15. 18. 21.]
 [24. 27. 30. 33.]
 [36. 39. 42. 45.]
 [48. 51. 54. 57.]]
<NDArray 5x4 @cpu(0)>

In [30]:
A + B


Out[30]:
[[ 3.  4.  5.  6.]
 [ 7.  8.  9. 10.]
 [11. 12. 13. 14.]
 [15. 16. 17. 18.]
 [19. 20. 21. 22.]]
<NDArray 5x4 @cpu(0)>

Sum of all elements


In [31]:
A


Out[31]:
[[ 0.  1.  2.  3.]
 [ 4.  5.  6.  7.]
 [ 8.  9. 10. 11.]
 [12. 13. 14. 15.]
 [16. 17. 18. 19.]]
<NDArray 5x4 @cpu(0)>

In [32]:
mx.nd.sum(A)


Out[32]:
[190.]
<NDArray 1 @cpu(0)>

Mean of all elements


In [33]:
mx.nd.mean(A)


Out[33]:
[9.5]
<NDArray 1 @cpu(0)>

Dot product


In [34]:
u


Out[34]:
[1. 2. 3. 7.]
<NDArray 4 @cpu(0)>

In [35]:
v


Out[35]:
[2. 2. 2. 2.]
<NDArray 4 @cpu(0)>

In [36]:
mx.nd.dot(u, v)


Out[36]:
[26.]
<NDArray 1 @cpu(0)>

Matrix-vector product


In [37]:
A


Out[37]:
[[ 0.  1.  2.  3.]
 [ 4.  5.  6.  7.]
 [ 8.  9. 10. 11.]
 [12. 13. 14. 15.]
 [16. 17. 18. 19.]]
<NDArray 5x4 @cpu(0)>

In [38]:
u


Out[38]:
[1. 2. 3. 7.]
<NDArray 4 @cpu(0)>

In [39]:
mx.nd.dot(A, u)


Out[39]:
[ 29.  81. 133. 185. 237.]
<NDArray 5 @cpu(0)>

Matrix-matrix multiplication


In [40]:
A = mx.nd.ones(shape=(3, 4))
B = mx.nd.ones(shape=(4, 5))
C = mx.nd.dot(A, B)
C


Out[40]:
[[4. 4. 4. 4. 4.]
 [4. 4. 4. 4. 4.]
 [4. 4. 4. 4. 4.]]
<NDArray 3x5 @cpu(0)>

In [41]:
C.shape


Out[41]:
(3, 5)

L2 norm


In [42]:
u


Out[42]:
[1. 2. 3. 7.]
<NDArray 4 @cpu(0)>

In [43]:
mx.nd.norm(u)


Out[43]:
[7.937254]
<NDArray 1 @cpu(0)>

L1 norm


In [44]:
# L1 norm can be calculated as follows l1 = sum(abs(u))
mx.nd.sum(mx.nd.abs(u))


Out[44]:
[13.]
<NDArray 1 @cpu(0)>